This patch adds necessary support for vcontext merge between
authorfred@localhost.localdomain <fred@localhost.localdomain>
Thu, 28 Jul 2005 15:16:12 +0000 (07:16 -0800)
committerfred@localhost.localdomain <fred@localhost.localdomain>
Thu, 28 Jul 2005 15:16:12 +0000 (07:16 -0800)
para and vti side. Per Dan's good suggestion, all the changes
are refined into ia64 specific. Accompanied with xeno patches
and a bit libxc change, xen0 + xenU can still work again.

Signed-off-by Eddie Dong <eddie.dong@intel.com>
Signed-off-by Kevin Tian <kevin.tian@intel.com>
Signed-off-by Fred Yang <fred.yang@intel.com>

12 files changed:
xen/arch/ia64/asm-offsets.c
xen/arch/ia64/asm-xsi-offsets.c
xen/arch/ia64/domain.c
xen/arch/ia64/process.c
xen/arch/ia64/regionreg.c
xen/arch/ia64/vcpu.c
xen/arch/ia64/xenasm.S
xen/include/asm-ia64/event.h
xen/include/asm-ia64/vcpu.h
xen/include/asm-ia64/vmx_vpd.h
xen/include/asm-ia64/xensystem.h
xen/include/public/arch-ia64.h

index 916ccc0fa635fb063981469f71fecb8bd2abc85e..f1ecc05431005ed6281c5e32d25fe741048df0c6 100644 (file)
@@ -42,29 +42,34 @@ void foo(void)
 
        BLANK();
 
-       DEFINE(XSI_PSR_IC_OFS, offsetof(vcpu_info_t, arch.interrupt_collection_enabled));
-       DEFINE(XSI_PSR_IC, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.interrupt_collection_enabled)));
-       DEFINE(XSI_PSR_I_OFS, offsetof(vcpu_info_t, arch.interrupt_delivery_enabled));
-       DEFINE(XSI_IIP_OFS, offsetof(vcpu_info_t, arch.iip));
-       DEFINE(XSI_IFA_OFS, offsetof(vcpu_info_t, arch.ifa));
-       DEFINE(XSI_ITIR_OFS, offsetof(vcpu_info_t, arch.itir));
-       DEFINE(XSI_IPSR, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.ipsr)));
-       DEFINE(XSI_IPSR_OFS, offsetof(vcpu_info_t, arch.ipsr));
-       DEFINE(XSI_IFS_OFS, offsetof(vcpu_info_t, arch.ifs));
-       DEFINE(XSI_ISR_OFS, offsetof(vcpu_info_t, arch.isr));
-       DEFINE(XSI_IIM_OFS, offsetof(vcpu_info_t, arch.iim));
-       DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum));
-       DEFINE(XSI_BANK0_OFS, offsetof(vcpu_info_t, arch.bank0_regs[0]));
-       DEFINE(XSI_BANK1_OFS, offsetof(vcpu_info_t, arch.bank1_regs[0]));
-       DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
-       DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode));
-       DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(vcpu_info_t, arch.precover_ifs));
-       DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t, arch.incomplete_regframe));
-       DEFINE(XSI_PEND_OFS, offsetof(vcpu_info_t, arch.pending_interruption));
-       DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
-       DEFINE(XSI_TPR_OFS, offsetof(vcpu_info_t, arch.tpr));
-       DEFINE(XSI_PTA_OFS, offsetof (vcpu_info_t, arch.pta));
-       DEFINE(XSI_ITV_OFS, offsetof(vcpu_info_t, arch.itv));
+       DEFINE(XSI_PSR_IC_OFS, offsetof(mapped_regs_t, interrupt_collection_enabled));
+       DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_collection_enabled)));
+       DEFINE(XSI_PSR_I_OFS, offsetof(mapped_regs_t, interrupt_delivery_enabled));
+       DEFINE(XSI_IIP_OFS, offsetof(mapped_regs_t, iip));
+       DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip)));
+       DEFINE(XSI_IFA_OFS, offsetof(mapped_regs_t, ifa));
+       DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa)));
+       DEFINE(XSI_ITIR_OFS, offsetof(mapped_regs_t, itir));
+       DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir)));
+
+       DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
+       DEFINE(XSI_IPSR_OFS, offsetof(mapped_regs_t, ipsr));
+       DEFINE(XSI_IFS_OFS, offsetof(mapped_regs_t, ifs));
+       DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs)));
+       DEFINE(XSI_ISR_OFS, offsetof(mapped_regs_t, isr));
+       DEFINE(XSI_IIM_OFS, offsetof(mapped_regs_t, iim));
+       DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum));
+       DEFINE(XSI_BANK0_OFS, offsetof(mapped_regs_t, bank0_regs[0]));
+       DEFINE(XSI_BANK1_OFS, offsetof(mapped_regs_t, bank1_regs[0]));
+       DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
+       DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode));
+       DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs));
+       DEFINE(XSI_INCOMPL_REG_OFS, offsetof(mapped_regs_t, incomplete_regframe));
+       DEFINE(XSI_PEND_OFS, offsetof(mapped_regs_t, pending_interruption));
+       DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
+       DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr));
+       DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta));
+       DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv));
        //DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
        //DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
        //DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
index 4951d4fbae4d7974f4af8ad2be72a5d1d5d09daf..80d5809ffd9440e9117e74f94869150578532f4b 100755 (executable)
 #define OFFSET(_sym, _str, _mem) \
     DEFINE(_sym, offsetof(_str, _mem));
 
-#ifndef CONFIG_VTI
-#define SHARED_ARCHINFO_ADDR SHAREDINFO_ADDR
-#endif
-
 void foo(void)
 {
 
        DEFINE(XSI_BASE, SHARED_ARCHINFO_ADDR);
 
-       DEFINE(XSI_PSR_I_OFS, offsetof(arch_vcpu_info_t, interrupt_delivery_enabled));
-       DEFINE(XSI_PSR_I, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, interrupt_delivery_enabled)));
-       DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, ipsr)));
-       DEFINE(XSI_IPSR_OFS, offsetof(arch_vcpu_info_t, ipsr));
-       DEFINE(XSI_IIP_OFS, offsetof(arch_vcpu_info_t, iip));
-       DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, iip)));
-       DEFINE(XSI_IFS_OFS, offsetof(arch_vcpu_info_t, ifs));
-       DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, ifs)));
-       DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(arch_vcpu_info_t, precover_ifs));
-       DEFINE(XSI_PRECOVER_IFS, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, precover_ifs)));
-       DEFINE(XSI_ISR_OFS, offsetof(arch_vcpu_info_t, isr));
-       DEFINE(XSI_ISR, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, isr)));
-       DEFINE(XSI_IFA_OFS, offsetof(arch_vcpu_info_t, ifa));
-       DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, ifa)));
-       DEFINE(XSI_IIPA_OFS, offsetof(arch_vcpu_info_t, iipa));
-       DEFINE(XSI_IIPA, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, iipa)));
-       DEFINE(XSI_IIM_OFS, offsetof(arch_vcpu_info_t, iim));
-       DEFINE(XSI_IIM, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, iim)));
-       DEFINE(XSI_TPR_OFS, offsetof(arch_vcpu_info_t, tpr));
-       DEFINE(XSI_TPR, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, tpr)));
-       DEFINE(XSI_IHA_OFS, offsetof(arch_vcpu_info_t, iha));
-       DEFINE(XSI_IHA, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, iha)));
-       DEFINE(XSI_ITIR_OFS, offsetof(arch_vcpu_info_t, itir));
-       DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, itir)));
-       DEFINE(XSI_ITV_OFS, offsetof(arch_vcpu_info_t, itv));
-       DEFINE(XSI_ITV, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, itv)));
-       DEFINE(XSI_PTA_OFS, offsetof(arch_vcpu_info_t, pta));
-       DEFINE(XSI_PTA, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, pta)));
-       DEFINE(XSI_PSR_IC_OFS, offsetof(arch_vcpu_info_t, interrupt_collection_enabled));
-       DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, interrupt_collection_enabled)));
-       DEFINE(XSI_PEND_OFS, offsetof(arch_vcpu_info_t, pending_interruption));
-       DEFINE(XSI_PEND, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, pending_interruption)));
-       DEFINE(XSI_INCOMPL_REGFR_OFS, offsetof(arch_vcpu_info_t, incomplete_regframe));
-       DEFINE(XSI_INCOMPL_REGFR, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, incomplete_regframe)));
-       DEFINE(XSI_DELIV_MASK0_OFS, offsetof(arch_vcpu_info_t, delivery_mask[0]));
-       DEFINE(XSI_DELIV_MASK0, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, delivery_mask[0])));
-       DEFINE(XSI_METAPHYS_OFS, offsetof(arch_vcpu_info_t, metaphysical_mode));
-       DEFINE(XSI_METAPHYS, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, metaphysical_mode)));
+       DEFINE(XSI_PSR_I_OFS, offsetof(mapped_regs_t, interrupt_delivery_enabled));
+       DEFINE(XSI_PSR_I, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_delivery_enabled)));
+       DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
+       DEFINE(XSI_IPSR_OFS, offsetof(mapped_regs_t, ipsr));
+       DEFINE(XSI_IIP_OFS, offsetof(mapped_regs_t, iip));
+       DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip)));
+       DEFINE(XSI_IFS_OFS, offsetof(mapped_regs_t, ifs));
+       DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs)));
+       DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs));
+       DEFINE(XSI_PRECOVER_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, precover_ifs)));
+       DEFINE(XSI_ISR_OFS, offsetof(mapped_regs_t, isr));
+       DEFINE(XSI_ISR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, isr)));
+       DEFINE(XSI_IFA_OFS, offsetof(mapped_regs_t, ifa));
+       DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa)));
+       DEFINE(XSI_IIPA_OFS, offsetof(mapped_regs_t, iipa));
+       DEFINE(XSI_IIPA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iipa)));
+       DEFINE(XSI_IIM_OFS, offsetof(mapped_regs_t, iim));
+       DEFINE(XSI_IIM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iim)));
+       DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr));
+       DEFINE(XSI_TPR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tpr)));
+       DEFINE(XSI_IHA_OFS, offsetof(mapped_regs_t, iha));
+       DEFINE(XSI_IHA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iha)));
+       DEFINE(XSI_ITIR_OFS, offsetof(mapped_regs_t, itir));
+       DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir)));
+       DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv));
+       DEFINE(XSI_ITV, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itv)));
+       DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta));
+       DEFINE(XSI_PTA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pta)));
+       DEFINE(XSI_PSR_IC_OFS, offsetof(mapped_regs_t, interrupt_collection_enabled));
+       DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_collection_enabled)));
+       DEFINE(XSI_PEND_OFS, offsetof(mapped_regs_t, pending_interruption));
+       DEFINE(XSI_PEND, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pending_interruption)));
+       DEFINE(XSI_INCOMPL_REGFR_OFS, offsetof(mapped_regs_t, incomplete_regframe));
+       DEFINE(XSI_INCOMPL_REGFR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, incomplete_regframe)));
+       DEFINE(XSI_DELIV_MASK0_OFS, offsetof(mapped_regs_t, delivery_mask[0]));
+       DEFINE(XSI_DELIV_MASK0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, delivery_mask[0])));
+       DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode));
+       DEFINE(XSI_METAPHYS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, metaphysical_mode)));
 
-       DEFINE(XSI_BANKNUM_OFS, offsetof(arch_vcpu_info_t, banknum));
-       DEFINE(XSI_BANKNUM, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, banknum)));
+       DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum));
+       DEFINE(XSI_BANKNUM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, banknum)));
 
-       DEFINE(XSI_BANK0_R16_OFS, offsetof(arch_vcpu_info_t, bank0_regs[0]));
-       DEFINE(XSI_BANK0_R16, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, bank0_regs[0])));
-       DEFINE(XSI_BANK1_R16_OFS, offsetof(arch_vcpu_info_t, bank1_regs[0]));
-       DEFINE(XSI_BANK1_R16, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, bank1_regs[0])));
-       DEFINE(XSI_RR0_OFS, offsetof(arch_vcpu_info_t, rrs[0]));
-       DEFINE(XSI_RR0, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, rrs[0])));
-       DEFINE(XSI_KR0_OFS, offsetof(arch_vcpu_info_t, krs[0]));
-       DEFINE(XSI_KR0, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, krs[0])));
-       DEFINE(XSI_PKR0_OFS, offsetof(arch_vcpu_info_t, pkrs[0]));
-       DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, pkrs[0])));
-       DEFINE(XSI_TMP0_OFS, offsetof(arch_vcpu_info_t, tmp[0]));
-       DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, tmp[0])));
+       DEFINE(XSI_BANK0_R16_OFS, offsetof(mapped_regs_t, bank0_regs[0]));
+       DEFINE(XSI_BANK0_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, bank0_regs[0])));
+       DEFINE(XSI_BANK1_R16_OFS, offsetof(mapped_regs_t, bank1_regs[0]));
+       DEFINE(XSI_BANK1_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, bank1_regs[0])));
+       DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
+       DEFINE(XSI_RR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, rrs[0])));
+       DEFINE(XSI_KR0_OFS, offsetof(mapped_regs_t, krs[0]));
+       DEFINE(XSI_KR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, krs[0])));
+       DEFINE(XSI_PKR0_OFS, offsetof(mapped_regs_t, pkrs[0]));
+       DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pkrs[0])));
+       DEFINE(XSI_TMP0_OFS, offsetof(mapped_regs_t, tmp[0]));
+       DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tmp[0])));
        
 }
index 3bd67d6e27270a48723537b4fcae7c27a3c0aa58..0886cd70037771d69ac96e72aead2504198870f3 100644 (file)
@@ -212,6 +212,10 @@ void arch_do_createdomain(struct vcpu *v)
         */
 
        memset(d->shared_info, 0, PAGE_SIZE);
+       d->shared_info->vcpu_data[v->vcpu_id].arch.privregs = 
+                       alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+       printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[0].arch.privregs);
+       memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, PAGE_SIZE);
        v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
        /* Mask all events, and specific port will be unmasked
         * when customer subscribes to it.
@@ -232,8 +236,8 @@ void arch_do_createdomain(struct vcpu *v)
        /* FIXME: This is identity mapped address for xenheap. 
         * Do we need it at all?
         */
-       d->xen_vastart = 0xf000000000000000;
-       d->xen_vaend = 0xf300000000000000;
+       d->xen_vastart = XEN_START_ADDR;
+       d->xen_vaend = XEN_END_ADDR;
        d->arch.breakimm = 0x1000;
 }
 #else // CONFIG_VTI
@@ -252,12 +256,16 @@ void arch_do_createdomain(struct vcpu *v)
                while (1);
        }
        memset(d->shared_info, 0, PAGE_SIZE);
+       d->shared_info->vcpu_data[0].arch.privregs = 
+                       alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+       printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[0].arch.privregs);
+       memset(d->shared_info->vcpu_data[0].arch.privregs, 0, PAGE_SIZE);
        v->vcpu_info = &(d->shared_info->vcpu_data[0]);
 
-       d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
+       d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
        if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
                BUG();
-       v->vcpu_info->arch.metaphysical_mode = 1;
+       VCPU(v, metaphysical_mode) = 1;
        v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
        v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
 #define DOMAIN_RID_BITS_DEFAULT 18
@@ -266,9 +274,9 @@ void arch_do_createdomain(struct vcpu *v)
        v->arch.starting_rid = d->arch.starting_rid;
        v->arch.ending_rid = d->arch.ending_rid;
        // the following will eventually need to be negotiated dynamically
-       d->xen_vastart = 0xf000000000000000;
-       d->xen_vaend = 0xf300000000000000;
-       d->shared_info_va = 0xf100000000000000;
+       d->xen_vastart = XEN_START_ADDR;
+       d->xen_vaend = XEN_END_ADDR;
+       d->shared_info_va = SHAREDINFO_ADDR;
        d->arch.breakimm = 0x1000;
        v->arch.breakimm = d->arch.breakimm;
 
@@ -292,7 +300,15 @@ void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
 
        printf("arch_getdomaininfo_ctxt\n");
        c->regs = *regs;
-       c->vcpu = v->vcpu_info->arch;
+       c->vcpu.evtchn_vector = v->vcpu_info->arch.evtchn_vector;
+#if 0
+       if (c->vcpu.privregs && copy_to_user(c->vcpu.privregs,
+                       v->vcpu_info->arch.privregs, sizeof(mapped_regs_t))) {
+               printk("Bad ctxt address: 0x%lx\n", c->vcpu.privregs);
+               return -EFAULT;
+       }
+#endif
+
        c->shared = v->domain->shared_info->arch;
 }
 
@@ -307,7 +323,13 @@ int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
        regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
        regs->ar_rsc |= (2 << 2); /* force PL2/3 */
 
-       v->vcpu_info->arch = c->vcpu;
+       v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
+       if ( c->vcpu.privregs && copy_from_user(v->vcpu_info->arch.privregs,
+                          c->vcpu.privregs, sizeof(mapped_regs_t))) {
+           printk("Bad ctxt address in arch_set_info_guest: 0x%lx\n", c->vcpu.privregs);
+           return -EFAULT;
+       }
+
        init_all_rr(v);
 
        // this should be in userspace
@@ -381,8 +403,8 @@ int arch_set_info_guest(
     new_thread(v, c->guest_iip, 0, 0);
 
 
-    d->xen_vastart = 0xf000000000000000;
-    d->xen_vaend = 0xf300000000000000;
+    d->xen_vastart = XEN_START_ADDR;
+    d->xen_vaend = XEN_END_ADDR;
     d->arch.breakimm = 0x1000 + d->domain_id;
     v->arch._thread.on_ustack = 0;
 
@@ -395,7 +417,13 @@ int arch_set_info_guest(
 
 void arch_do_boot_vcpu(struct vcpu *v)
 {
+       struct domain *d = v->domain;
        printf("arch_do_boot_vcpu: not implemented\n");
+
+       d->shared_info->vcpu_data[v->vcpu_id].arch.privregs = 
+                       alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+       printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[v->vcpu_id].arch.privregs);
+       memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, PAGE_SIZE);
        return;
 }
 
index 8ccfb042350aace9e662df287cfa7449654d0d9b..7ee9914b83ac246e0a00593a06295984f1846fb3 100644 (file)
@@ -226,7 +226,7 @@ panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%p,iip=%p,ifa=%p,isr=%p
 #ifdef CONFIG_SMP
 #error "sharedinfo doesn't handle smp yet"
 #endif
-       regs->r31 = &((shared_info_t *)SHAREDINFO_ADDR)->vcpu_data[0].arch;
+       regs->r31 = &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
 
        PSCB(v,interrupt_delivery_enabled) = 0;
        PSCB(v,interrupt_collection_enabled) = 0;
index 04206edf4d6818b10a54d2790c982e0e07490574..3172c2af35a0668d2ca4eb2bf2bbb404b521de94 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/regionreg.h>
 #include <asm/vhpt.h>
 #include <asm/vcpu.h>
+extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info);
 
 
 #define        IA64_MIN_IMPL_RID_BITS  (IA64_MIN_IMPL_RID_MSB+1)
@@ -274,7 +275,8 @@ int set_one_rr(unsigned long rr, unsigned long val)
                newrrv.rid = newrid;
                newrrv.ve = VHPT_ENABLED_REGION_7;
                newrrv.ps = IA64_GRANULE_SHIFT;
-               ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
+               ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
+                               v->vcpu_info->arch.privregs);
        }
        else {
                newrrv.rid = newrid;
@@ -291,7 +293,8 @@ int set_one_rr(unsigned long rr, unsigned long val)
        newrrv.ve = 1;  // VHPT now enabled for region 7!!
        newrrv.ps = PAGE_SHIFT;
        if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
-       if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
+       if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
+                               v->vcpu_info->arch.privregs);
        else set_rr(rr,newrrv.rrval);
 #endif
        return 1;
index b5ff7fcffa840607a76b09b0fc2de20e485e118e..49fef157c7ead985814aaf3ef5a58952888b5612 100644 (file)
@@ -1185,12 +1185,6 @@ IA64FAULT vcpu_rfi(VCPU *vcpu)
        //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
        //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
        if (ifs & regs->cr_ifs & 0x8000000000000000L) {
-#define SI_OFS(x)      ((char *)(&PSCB(vcpu,x)) - (char *)(vcpu->vcpu_info))
-if (SI_OFS(iip)!=0x10 || SI_OFS(ipsr)!=0x08 || SI_OFS(ifs)!=0x18) {
-printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n");
-printf("SI_CR_IIP=0x%x,IPSR=0x%x,IFS_OFFSET=0x%x\n",SI_OFS(iip),SI_OFS(ipsr),SI_OFS(ifs));
-while(1);
-}
                // TODO: validate PSCB(vcpu,iip)
                // TODO: PSCB(vcpu,ipsr) = psr;
                PSCB(vcpu,ipsr) = psr.i64;
index f04dfea8fa7b1ff5298f8d5c6418faf08e1e6d87..92b47cc9767d3bea217a0feeb134b1602d93f9d3 100644 (file)
@@ -48,10 +48,11 @@ END(platform_is_hp_ski)
 // FIXME? Note that this turns off the DB bit (debug)
 #define PSR_BITS_TO_SET        IA64_PSR_BN
 
+//extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info);
 GLOBAL_ENTRY(ia64_new_rr7)
        // not sure this unwind statement is correct...
        .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
-       alloc loc1 = ar.pfs, 2, 7, 0, 0
+       alloc loc1 = ar.pfs, 3, 8, 0, 0
 1:     {
          mov r28  = in0                // copy procedure index
          mov r8   = ip                 // save ip to compute branch
@@ -72,6 +73,10 @@ GLOBAL_ENTRY(ia64_new_rr7)
        ;;
        tpa loc5=loc5                   // grab this BEFORE changing rr7
        ;;
+       mov loc7=in2                    // arch_vcpu_info_t
+       ;;
+       tpa loc7=loc7                   // grab this BEFORE changing rr7
+       ;;
        mov loc3 = psr                  // save psr
        adds r8  = 1f-1b,r8             // calculate return address for call
        ;;
@@ -206,6 +211,25 @@ GLOBAL_ENTRY(ia64_new_rr7)
        ;;
        itr.d dtr[r25]=r23              // wire in new mapping...
        ;;
+       // Map for arch_vcpu_info_t
+       movl r22=SHARED_ARCHINFO_ADDR
+       ;;
+       movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
+       ;;
+       mov r21=loc7                    // saved sharedinfo physical address
+       ;;
+       or r23=r25,r21                  // construct PA | page properties
+       mov r24=PAGE_SHIFT<<2
+       ;;
+       ptr.d   r22,r24
+       ;;
+       mov cr.itir=r24
+       mov cr.ifa=r22
+       ;;
+       mov r25=IA64_TR_ARCH_INFO
+       ;;
+       itr.d dtr[r25]=r23              // wire in new mapping...
+       ;;
 
        // done, switch back to virtual and return
        mov r16=loc3                    // r16= original psr
@@ -278,12 +302,9 @@ GLOBAL_ENTRY(__get_domain_bundle)
 END(__get_domain_bundle)
 
 GLOBAL_ENTRY(dorfirfi)
-#define SI_CR_IIP_OFFSET 0x10
-#define SI_CR_IPSR_OFFSET 0x08
-#define SI_CR_IFS_OFFSET 0x18
-        movl r16 = SHAREDINFO_ADDR+SI_CR_IIP_OFFSET
-        movl r17 = SHAREDINFO_ADDR+SI_CR_IPSR_OFFSET
-        movl r18 = SHAREDINFO_ADDR+SI_CR_IFS_OFFSET
+        movl r16 = XSI_IIP
+        movl r17 = XSI_IPSR
+        movl r18 = XSI_IFS
        ;;
        ld8 r16 = [r16]
        ld8 r17 = [r17]
index 9c7693094a78aead9b834fac60b429b2a7998719..d01111be7c73d8c7edf6a4a6e9d60ac4d5961356 100644 (file)
@@ -9,11 +9,12 @@
 #ifndef __ASM_EVENT_H__
 #define __ASM_EVENT_H__
 
+#include <public/arch-ia64.h>
 #include <asm/vcpu.h>
 
 static inline void evtchn_notify(struct vcpu *v)
 {
-       vcpu_pend_interrupt(v, VCPU(v,evtchn_vector));
+       vcpu_pend_interrupt(v, v->vcpu_info->arch.evtchn_vector);
 }
 
 #endif
index 5dc2452bca89ef74c6e7617e01b5b5fe50c6a1cc..e48f1e2e68fed1634c6d5ee81726075feeaee83d 100644 (file)
@@ -15,7 +15,7 @@ typedef       struct vcpu VCPU;
 
 typedef struct pt_regs REGS;
 
-#define VCPU(_v,_x)    _v->vcpu_info->arch._x
+#define VCPU(_v,_x)    _v->vcpu_info->arch.privregs->_x
 
 #define PRIVOP_ADDR_COUNT
 #ifdef PRIVOP_ADDR_COUNT
index 1a18f03661507e9ca70cfbc3cc55b09fd3cc3ca7..2b34e94fcc448cc1ebab353df6902fe61b6a960c 100644 (file)
 
 #define VPD_SHIFT      17      /* 128K requirement */
 #define VPD_SIZE       (1 << VPD_SHIFT)
-typedef union {
-       unsigned long value;
-       struct {
-               int     a_int:1;
-               int     a_from_int_cr:1;
-               int     a_to_int_cr:1;
-               int     a_from_psr:1;
-               int     a_from_cpuid:1;
-               int     a_cover:1;
-               int     a_bsw:1;
-               long    reserved:57;
-       };
-} vac_t;
-
-typedef union {
-       unsigned long value;
-       struct {
-               int     d_vmsw:1;
-               int     d_extint:1;
-               int     d_ibr_dbr:1;
-               int     d_pmc:1;
-               int     d_to_pmd:1;
-               int     d_itm:1;
-               long    reserved:58;
-       };
-} vdc_t;
 
 typedef struct {
        unsigned long   dcr;            // CR0
@@ -89,29 +63,6 @@ typedef struct {
        unsigned long   rsv6[46];
 } cr_t;
 
-typedef struct vpd {
-       vac_t                   vac;
-       vdc_t                   vdc;
-       unsigned long           virt_env_vaddr;
-       unsigned long           reserved1[29];
-       unsigned long           vhpi;
-       unsigned long           reserved2[95];
-       unsigned long           vgr[16];
-       unsigned long           vbgr[16];
-       unsigned long           vnat;
-       unsigned long           vbnat;
-       unsigned long           vcpuid[5];
-       unsigned long           reserved3[11];
-       unsigned long           vpsr;
-       unsigned long           vpr;
-       unsigned long           reserved4[76];
-       unsigned long           vcr[128];
-       unsigned long           reserved5[128];
-       unsigned long           reserved6[3456];
-       unsigned long           vmm_avail[128];
-       unsigned long           reserved7[4096];
-} vpd_t;
-
 void vmx_enter_scheduler(void);
 
 //FIXME: Map for LID to vcpu, Eddie
index 6ab57719bb50fcfb2ffac4b2ac0df8786e513f6e..376f480764f1f0801ead3cfb75ab803af46d02e9 100644 (file)
 #define XEN_RR7_SWITCH_STUB     0xb700000000000000
 #endif // CONFIG_VTI
 
+#define XEN_START_ADDR          0xf000000000000000
 #define KERNEL_START            0xf000000004000000
 #define PERCPU_ADDR             0xf100000000000000-PERCPU_PAGE_SIZE
 #define SHAREDINFO_ADDR                 0xf100000000000000
 #define VHPT_ADDR               0xf200000000000000
+#define SHARED_ARCHINFO_ADDR    0xf300000000000000
+#define XEN_END_ADDR            0xf400000000000000
 
 #ifndef __ASSEMBLY__
 
index 34e9f1c0ae59513664f9ed8b87b23da3e59fec52..3ce2a7d7e47fa352658f2abacfbafd122cc46816 100644 (file)
@@ -140,38 +140,121 @@ struct pt_regs {
        struct pt_fpreg f11;            /* scratch */
 };
 
+typedef union {
+       unsigned long value;
+       struct {
+               int     a_int:1;
+               int     a_from_int_cr:1;
+               int     a_to_int_cr:1;
+               int     a_from_psr:1;
+               int     a_from_cpuid:1;
+               int     a_cover:1;
+               int     a_bsw:1;
+               long    reserved:57;
+       };
+} vac_t;
+
+typedef union {
+       unsigned long value;
+       struct {
+               int     d_vmsw:1;
+               int     d_extint:1;
+               int     d_ibr_dbr:1;
+               int     d_pmc:1;
+               int     d_to_pmd:1;
+               int     d_itm:1;
+               long    reserved:58;
+       };
+} vdc_t;
+
 typedef struct {
-       unsigned long ipsr;
-       unsigned long iip;
-       unsigned long ifs;
-       unsigned long precover_ifs;
-       unsigned long isr;
-       unsigned long ifa;
-       unsigned long iipa;
-       unsigned long iim;
-       unsigned long unat;  // not sure if this is needed until NaT arch is done
-       unsigned long tpr;
-       unsigned long iha;
-       unsigned long itir;
-       unsigned long itv;
-       unsigned long pmv;
-       unsigned long cmcv;
-       unsigned long pta;
-       int interrupt_collection_enabled; // virtual psr.ic
-       int interrupt_delivery_enabled; // virtual psr.i
-       int pending_interruption;
-       int incomplete_regframe;        // see SDM vol2 6.8
-       unsigned long delivery_mask[4];
-       int metaphysical_mode;  // 1 = use metaphys mapping, 0 = use virtual
-       int banknum;    // 0 or 1, which virtual register bank is active
-       unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
-       unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
-       unsigned long rrs[8];   // region registers
-       unsigned long krs[8];   // kernel registers
-       unsigned long pkrs[8];  // protection key registers
-       unsigned long tmp[8];   // temp registers (e.g. for hyperprivops)
+       vac_t                   vac;
+       vdc_t                   vdc;
+       unsigned long           virt_env_vaddr;
+       unsigned long           reserved1[29];
+       unsigned long           vhpi;
+       unsigned long           reserved2[95];
+       union {
+         unsigned long         vgr[16];
+         unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
+       };
+       union {
+         unsigned long         vbgr[16];
+         unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
+       };
+       unsigned long           vnat;
+       unsigned long           vbnat;
+       unsigned long           vcpuid[5];
+       unsigned long           reserved3[11];
+       unsigned long           vpsr;
+       unsigned long           vpr;
+       unsigned long           reserved4[76];
+       union {
+         unsigned long         vcr[128];
+          struct {
+           unsigned long       dcr;            // CR0
+           unsigned long       itm;
+           unsigned long       iva;
+           unsigned long       rsv1[5];
+           unsigned long       pta;            // CR8
+           unsigned long       rsv2[7];
+           unsigned long       ipsr;           // CR16
+           unsigned long       isr;
+           unsigned long       rsv3;
+           unsigned long       iip;
+           unsigned long       ifa;
+           unsigned long       itir;
+           unsigned long       iipa;
+           unsigned long       ifs;
+           unsigned long       iim;            // CR24
+           unsigned long       iha;
+           unsigned long       rsv4[38];
+           unsigned long       lid;            // CR64
+           unsigned long       ivr;
+           unsigned long       tpr;
+           unsigned long       eoi;
+           unsigned long       irr[4];
+           unsigned long       itv;            // CR72
+           unsigned long       pmv;
+           unsigned long       cmcv;
+           unsigned long       rsv5[5];
+           unsigned long       lrr0;           // CR80
+           unsigned long       lrr1;
+           unsigned long       rsv6[46];
+          };
+       };
+       union {
+         unsigned long         reserved5[128];
+         struct {
+           unsigned long precover_ifs;
+           unsigned long unat;  // not sure if this is needed until NaT arch is done
+           int interrupt_collection_enabled; // virtual psr.ic
+           int interrupt_delivery_enabled; // virtual psr.i
+           int pending_interruption;
+           int incomplete_regframe;    // see SDM vol2 6.8
+           unsigned long delivery_mask[4];
+           int metaphysical_mode;      // 1 = use metaphys mapping, 0 = use virtual
+           int banknum;        // 0 or 1, which virtual register bank is active
+           unsigned long rrs[8];       // region registers
+           unsigned long krs[8];       // kernel registers
+           unsigned long pkrs[8];      // protection key registers
+           unsigned long tmp[8];       // temp registers (e.g. for hyperprivops)
+         };
+        };
+#ifdef CONFIG_VTI
+       unsigned long           reserved6[3456];
+       unsigned long           vmm_avail[128];
+       unsigned long           reserved7[4096];
+#endif
+} mapped_regs_t;
+
+typedef struct {
+       mapped_regs_t *privregs;
        int evtchn_vector;
 } arch_vcpu_info_t;
+
+typedef arch_vcpu_info_t vpd_t;
+
 #define __ARCH_HAS_VCPU_INFO
 
 typedef struct {